import os
import sqlite3
import geopandas as gpd
import folium as fl
import pandas as pd
from collections import OrderedDict
from datetime import datetime
from rich import print
from matplotlib import pyplot as plt
import numpy as npEvaluating the benefit of adding more road stations
Introduction
This document presents an analysis of the benefits of adding new road stations to the already dense network of road stations in Denmark and how this affects the performance of the glatmodel. The focus of the analysis is on selecting a few key areas of the country where there has been a reasonably large increase in the number of stations (ie, at least 10 new stations).
Procedure
Observational and forecast data for road temperature (TROAD) is stored in sqlite tables generated for the harp package. There are currently stations in the database.
Read different polygons to use them later to select stations from the forecast and observations
fyn_pol = gpd.read_file("polygons/around_fyn.geojson")
nwz_pol = gpd.read_file("polygons/around_nw_zealand.geojson")
mju_pol = gpd.read_file("polygons/somewhere_midjutland.geojson")Select math with all stations in the selected regions
all_stations = gpd.read_file("all_vejvejr_stations_2023.shp")
stations = OrderedDict()
stations["fyn"] = gpd.sjoin(all_stations,fyn_pol,predicate="within")
stations["mju"] = gpd.sjoin(all_stations,mju_pol,predicate="within")
stations["nwz"] = gpd.sjoin(all_stations,nwz_pol,predicate="within")
#model = "R01"We will be using the cycles below to read the forecast data from the road model for the indicated month and year.
cycles= [str(i).zfill(2) for i in range(25)]
cycles=["21","22","23","01","02","03"]
DATA="/home/cap/Downloads/ROAD_MODEL_DATA"
hours_fcst = 6Helper function to read the data from the sqlite files generated for harp.
def read_sqlite(dbase:str, table:str) -> pd.DataFrame:
con=sqlite3.connect(dbase)
com=f"SELECT * FROM {table}"
df = pd.read_sql(com,con)
con.close()
df["datetime"] = pd.to_datetime(df["validdate"],unit="s")
if table == "FC":
df["fcstdatetime"] = pd.to_datetime(df["fcdate"],unit="s")
# the station list I get above only identifies the first 4 digits (ie, station but not sensor)
# add this information below as a new column
df["SID_partial"] = [int(str(sid)[0:4]) for sid in df.SID]
gpd_df = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon, df.lat), crs="EPSG:4326")
return gpd_dfRead the forecast data for the selected cycles and all the observational data.
YYYY="2022"
MM="03"
fcst_data_glat=OrderedDict()
fcst_data_R01=OrderedDict()
for cycle in cycles:
fname=os.path.join(DATA,"FCTABLE_DATA","glatmodel",f"FCTABLE_TROAD_{YYYY}{MM}_{cycle}.sqlite")
if os.path.isfile(fname):
print(f"Reading {fname}")
fcst_data_glat[cycle] = read_sqlite(fname,"FC")
else:
print(f"ERROR: {fname} does not exist!")
sys.exit(1)
fname=os.path.join(DATA,"FCTABLE_DATA","R01",f"FCTABLE_TROAD_{YYYY}{MM}_{cycle}.sqlite")
if os.path.isfile(fname):
print(f"Reading {fname}")
fcst_data_R01[cycle] = read_sqlite(fname,"FC")
else:
print(f"ERROR: {fname} does not exist!")
sys.exit(1)
obs_data = read_sqlite(os.path.join(DATA,f"OBSTABLE_TROAD_{YYYY}.sqlite"),"SYNOP")Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/glatmodel/FCTABLE_TROAD_202203_21.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/R01/FCTABLE_TROAD_202203_21.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/glatmodel/FCTABLE_TROAD_202203_22.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/R01/FCTABLE_TROAD_202203_22.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/glatmodel/FCTABLE_TROAD_202203_23.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/R01/FCTABLE_TROAD_202203_23.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/glatmodel/FCTABLE_TROAD_202203_01.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/R01/FCTABLE_TROAD_202203_01.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/glatmodel/FCTABLE_TROAD_202203_02.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/R01/FCTABLE_TROAD_202203_02.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/glatmodel/FCTABLE_TROAD_202203_03.sqlite
Reading /home/cap/Downloads/ROAD_MODEL_DATA/FCTABLE_DATA/R01/FCTABLE_TROAD_202203_03.sqlite
Select only a subset for each domain. Note this is selecting ALL stations inside the corresponding polygon, irrespective of creation date.
fcst_sel_glat = OrderedDict()
fcst_sel_R01 = OrderedDict()
obs_sel = OrderedDict()
for key in stations.keys():
st_group = stations[key]["SID"].to_list()
obs_sel[key] = obs_data[obs_data["SID_partial"].isin(st_group)]
for cycle in cycles:
fcst_sel_glat[key+"_"+cycle] = fcst_data_glat[cycle][fcst_data_glat[cycle]["SID_partial"].isin(st_group)]
if len(fcst_sel_glat[key+"_"+cycle]) == 0:
print(f"No stations found in {key} list for glatmodel")
fcst_sel_R01[key+"_"+cycle] = fcst_data_R01[cycle][fcst_data_R01[cycle]["SID_partial"].isin(st_group)]
if len(fcst_sel_R01[key+"_"+cycle]) == 0:
print(f"No stations found in {key} list for glatmodel")There was an increase on the number of stations on Fyn on Jan 2022 and on the other two regions in January 2023
Below we select a few example dates, based on the increase of stations availability, and plot the corresponding distributions. We start with stations in Fyn. Select the forecast data for the given date and the first 6 hours of the the forecast for a given cycle (in this case 21). We check first the day before the increase in station data: 20220302
sel_date = datetime(2022,3,2,21)
pol_hour = "fyn_21"
plt_fcst_R01 = OrderedDict()
plt_fcst_glat = OrderedDict()
for leadtime in range(1,hours_fcst):
plt_fcst_R01[leadtime] = fcst_sel_R01[pol_hour][(fcst_sel_R01[pol_hour]["fcstdatetime"] == sel_date)&(fcst_sel_R01[pol_hour]["leadtime"] == leadtime)]
plt_fcst_glat[leadtime] = fcst_sel_glat[pol_hour][(fcst_sel_glat[pol_hour]["fcstdatetime"] == sel_date)&(fcst_sel_glat[pol_hour]["leadtime"] == leadtime)]The data is selected separately for each leadtime:
sum=plt_fcst_glat[1][["fcstdatetime","leadtime","glatmodel_det"]].to_string(index=False)
print(sum)
#sum=plt_fcst[2][["fcstdatetime","leadtime","glatmodel_det"]].to_string(index=False)
#print(sum)fcstdatetime leadtime glatmodel_det 2022-03-02 21:00:00 1 1.01 2022-03-02 21:00:00 1 1.20 2022-03-02 21:00:00 1 0.23 2022-03-02 21:00:00 1 0.31 2022-03-02 21:00:00 1 0.55 2022-03-02 21:00:00 1 0.56 2022-03-02 21:00:00 1 -0.46 2022-03-02 21:00:00 1 -1.17 2022-03-02 21:00:00 1 -0.57 2022-03-02 21:00:00 1 0.30 2022-03-02 21:00:00 1 -0.50 2022-03-02 21:00:00 1 0.00 2022-03-02 21:00:00 1 0.43 2022-03-02 21:00:00 1 0.14 2022-03-02 21:00:00 1 0.16 2022-03-02 21:00:00 1 0.24 2022-03-02 21:00:00 1 1.28 2022-03-02 21:00:00 1 1.44 2022-03-02 21:00:00 1 0.51 2022-03-02 21:00:00 1 0.84 2022-03-02 21:00:00 1 -0.28 2022-03-02 21:00:00 1 1.74 2022-03-02 21:00:00 1 1.70 2022-03-02 21:00:00 1 -0.99 2022-03-02 21:00:00 1 -1.29 2022-03-02 21:00:00 1 -0.04 2022-03-02 21:00:00 1 1.35 2022-03-02 21:00:00 1 -0.56 2022-03-02 21:00:00 1 0.29 2022-03-02 21:00:00 1 0.80 2022-03-02 21:00:00 1 0.38 2022-03-02 21:00:00 1 0.00 2022-03-02 21:00:00 1 -0.09 2022-03-02 21:00:00 1 1.56 2022-03-02 21:00:00 1 2.42 2022-03-02 21:00:00 1 0.41 2022-03-02 21:00:00 1 -0.30 2022-03-02 21:00:00 1 0.49 2022-03-02 21:00:00 1 -0.45 2022-03-02 21:00:00 1 0.15 2022-03-02 21:00:00 1 0.00 2022-03-02 21:00:00 1 1.10 2022-03-02 21:00:00 1 0.53 2022-03-02 21:00:00 1 0.24 2022-03-02 21:00:00 1 0.15 2022-03-02 21:00:00 1 -0.04 2022-03-02 21:00:00 1 1.18 2022-03-02 21:00:00 1 0.35 2022-03-02 21:00:00 1 0.63 2022-03-02 21:00:00 1 0.25
Now select the corresponding data for the observations. Repeating for each, but this should not change.
pol_name="fyn"
plt_obs = OrderedDict()
for leadtime in range(1,hours_fcst):
sel_dates = plt_fcst_R01[leadtime]["datetime"].to_list()
plt_obs[leadtime] = obs_sel[pol_name][obs_sel[pol_name]["datetime"].isin(sel_dates)]Plot them one after the other, for each leadtime together with the observations.
date_str = datetime.strftime(datetime(2022,3,2,21),"%Y-%m-%d")
stds=[]
means=[]
fig = plt.figure(layout="constrained",figsize=(10,6))
fig.suptitle(f'Evolution of the distribution of forecast observations on {date_str} for cycle 21', fontsize=16)
mosaic=[["1","2","3"],["4","5","6"]]
ax_dict=fig.subplot_mosaic(mosaic)
for key in plt_fcst_R01.keys():
#stds.append(plt_fcst[key][model+"_det"].std())
#means.append(plt_fcst[key][model+"_det"].mean())
if len(plt_fcst_R01[key]["R01_det"]) == 0:
print(f"There is no data in {key}")
p1=plt_fcst_glat[key]["glatmodel_det"].plot.density(legend=key,ax = ax_dict[str(key)])#.subplot_mosaic()
plt_fcst_R01[key]["R01_det"].plot.density(legend=key,ax = ax_dict[str(key)])
plt_obs[key]["TROAD"].plot.density(legend=key,ax = ax_dict[str(key)])
p1.legend([f"hour {key} glat",f"hour {key} R01", "obs"])
#plt.xlim([-4,4])Now we check first the day when the data increased: 20220303, same cycle
sel_date = datetime(2022,3,3,21)
pol_hour = "fyn_21"
plt_fcst_R01 = OrderedDict()
plt_fcst_glat = OrderedDict()
for leadtime in range(1,hours_fcst):
plt_fcst_R01[leadtime] = fcst_sel_R01[pol_hour][(fcst_sel_R01[pol_hour]["fcstdatetime"] == sel_date)&(fcst_sel_R01[pol_hour]["leadtime"] == leadtime)]
plt_fcst_glat[leadtime] = fcst_sel_glat[pol_hour][(fcst_sel_glat[pol_hour]["fcstdatetime"] == sel_date)&(fcst_sel_glat[pol_hour]["leadtime"] == leadtime)]
pol_name="fyn"
plt_obs = OrderedDict()
for leadtime in range(1,hours_fcst):
sel_dates = plt_fcst_R01[leadtime]["datetime"].to_list()
plt_obs[leadtime] = obs_sel[pol_name][obs_sel[pol_name]["datetime"].isin(sel_dates)]Plot now the same distributions for the day after.
stds=[]
means=[]
fig = plt.figure(layout="constrained",figsize=(10,6))
date_str = datetime.strftime(sel_date,"%Y-%m-%d")
fig.suptitle(f'Evolution of the distribution of forecast observations on {date_str} for cycle 21', fontsize=16)
mosaic=[["1","2","3"],["4","5","6"]]
ax_dict=fig.subplot_mosaic(mosaic)
for key in plt_fcst_R01.keys():
#stds.append(plt_fcst_R01[key][model+"_det"].std())
#means.append(plt_fcst[key][model+"_det"].mean())
if len(plt_fcst_R01[key]["R01_det"]) == 0:
print(f"There is no data for R01 in {key}")
p1=plt_fcst_R01[key]["R01_det"].plot.density(legend=key,ax = ax_dict[str(key)])
plt_fcst_glat[key]["glatmodel_det"].plot.density(legend=key,ax = ax_dict[str(key)])
plt_obs[key]["TROAD"].plot.density(legend=key,ax = ax_dict[str(key)])
p1.legend([f"hour {key} R01",f"hour {key} glatmodel", "obs"])
#stats=pd.DataFrame({"leadtime":[1,2,3,4,5,6],"std":stds,"mean":means})
#print("Means and standard deviations")
#print(stats[["leadtime","mean","std"]])Creating a function to repeat this process:
def set_plot_fcst(sel_date,pol_hour,pol_name,fcst_sel):
plt_fcst = OrderedDict()
for leadtime in range(1,hours_fcst):
plt_fcst[leadtime] = fcst_sel[pol_hour][(fcst_sel[pol_hour]["fcstdatetime"] == sel_date)&(fcst_sel[pol_hour]["leadtime"] == leadtime)]
if len(plt_fcst[leadtime]) == 0:
print(f"Nothing in dataframe for {leadtime}")
print(fcst_sel[pol_hour])
return plt_fcst
def set_plot_obs(sel_date,pol_hour,pol_name,plt_fcst):
plt_obs = OrderedDict()
for leadtime in range(1,hours_fcst):
sel_dates = plt_fcst[leadtime]["datetime"].to_list()
plt_obs[leadtime] = obs_sel[pol_name][obs_sel[pol_name]["datetime"].isin(sel_dates)]
if len(plt_obs[leadtime]) == 0:
print(f"No data in {leadtime}")
return plt_obs
def plot_dist(plt_fcst,plt_obs,sel_date,cycle,model):
stds=[]
means=[]
fig = plt.figure(layout="constrained",figsize=(10,6))
date_str = datetime.strftime(sel_date,"%Y-%m-%d")
fig.suptitle(f'Evolution of the distribution of forecast observations on {date_str} for cycle {cycle}', fontsize=16)
mosaic=[["1","2","3"],["4","5","6"]]
ax_dict=fig.subplot_mosaic(mosaic)
for key in plt_fcst.keys():
stds.append(plt_fcst[key][model+"_det"].std())
means.append(plt_fcst[key][model+"_det"].mean())
p1=plt_fcst[key][model+"_det"].plot.density(legend=key,ax = ax_dict[str(key)])#.subplot_mosaic()
plt_obs[key]["TROAD"].plot.density(legend=key,ax = ax_dict[str(key)])
p1.legend([f"hour {key}", "obs"])
hours = [key for key in plt_fcst.keys()]
#stats=pd.DataFrame({"leadtime":[1,2,3,4,5,6],"std":stds,"mean":means})
stats=pd.DataFrame({"leadtime":hours,"std":stds,"mean":means})
return stats
def plot_cum_dist(plt_fcst,plt_obs,sel_date,cycle,model):
fig = plt.figure(layout="constrained",figsize=(10,6))
date_str = datetime.strftime(sel_date,"%Y-%m-%d")
fig.suptitle(f'Evolution of the CDFs on {date_str} for cycle {cycle}', fontsize=16)
mosaic=[["1","2","3"],["4","5","6"]]
ax_dict=fig.subplot_mosaic(mosaic)
nbins=20
print(ax_dict.keys())
for key in plt_fcst.keys():
counts = pd.cut(plt_obs[key]["TROAD"],nbins).value_counts().sort_index()
counts_obs = counts.values
s_cut, bins_obs = pd.cut(plt_obs[key]["TROAD"], nbins, retbins=True)
counts = pd.cut(plt_fcst[key][model+"_det"],nbins).value_counts().sort_index()
s_cut, bins_fcst = pd.cut(plt_fcst[key][model+"_det"], nbins, retbins=True)
counts_fcst = counts.values
#print(counts_fcst)
#print(bins_fcst)
pdf_fcst = counts_fcst/np.sum(counts_fcst)
cdf_fcst = np.cumsum(pdf_fcst)
#print(pdf_fcst)
#print(cdf_fcst)
pdf_obs = counts_obs/np.sum(counts_obs)
cdf_obs = np.cumsum(pdf_obs)
#ax_dict[str(key)].plot(bins_fcst[1:],pdf_fcst,color="red",label="PDF forecast")
ax_dict[str(key)].plot(bins_fcst[1:],cdf_fcst,color="red",label="CDF forecast")
ax_dict[str(key)].plot(bins_obs[1:],cdf_obs,color="blue",label="CDF observations")
ax_dict[str(key)].legend([f"forecast {key}", "obs"])
#this one calculates the KS test to determine if both distributions
#are similar
from scipy import stats
D, p_value = stats.ks_2samp(cdf_fcst,cdf_obs)
print(f"KS test for checking if distributions are differend. D={D}, p_value ={p_value} (only similar if p_value > 0.05)")
#can also use numpy
#counts,division = np.histogram(plt_obs[key]["TROAD"])
#counts,division = np.histogram(plt_fcst[key][model+"_det"])
#cdf_obs = np.cumsum(plt_obs[key]["TROAD"].hist())
#cdf_fcst = np.cumsum(plt_fcst[key][model+"_det"].hist())
#plt.plot(cdf_fcst)
#plt.plot(cfd_obs)
#p1=plt_fcst[key][model+"_det"].plot.density(legend=key,ax = ax_dict[str(key)])#.subplot_mosaic()
#stats=pd.DataFrame({"leadtime":[1,2,3,4,5,6],"std":stds,"mean":means})
#return statsSelecting all the other cycles
date_before=datetime(2022,3,2,22)
date_after=datetime(2022,3,3,22)
pol_name="fyn"Plotting for cycle 22, day before and day after.
pol_hour="fyn_22"
cycle=pol_hour.split("_")[1]
plt_fcst_R01 = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_before,pol_hour,pol_name,plt_fcst_R01)
stats_R01 = plot_dist(plt_fcst_R01,plt_obs,date_before,cycle,"R01")
stats_glat = plot_dist(plt_fcst_glat,plt_obs,date_before,cycle,"glatmodel")
plt_fcst_R01 = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_after,pol_hour,pol_name,plt_fcst_R01)
stats_R01 = plot_dist(plt_fcst_R01,plt_obs,date_after,cycle,"R01")
stats_glat = plot_dist(plt_fcst_glat,plt_obs,date_after,cycle,"glatmodel")
plot_cum_dist(plt_fcst_R01,plt_obs,date_after,cycle,"R01")
plot_cum_dist(plt_fcst_glat,plt_obs,date_after,cycle,"glatmodel")dict_keys(['1', '2', '3', '4', '5', '6'])
KS test for checking if distributions are differend. D=0.35, p_value =0.17453300569806826 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.35, p_value =0.17453300569806826 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.35, p_value =0.17453300569806826 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.25, p_value =0.571336004933722 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.2, p_value =0.8319696107963263 (only similar if p_value > 0.05)
dict_keys(['1', '2', '3', '4', '5', '6'])
KS test for checking if distributions are differend. D=0.15, p_value =0.9831368772656193 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.2, p_value =0.8319696107963263 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.25, p_value =0.571336004933722 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.3, p_value =0.33559098126008213 (only similar if p_value > 0.05)
KS test for checking if distributions are differend. D=0.2, p_value =0.8319696107963263 (only similar if p_value > 0.05)
Plotting for cycle 23, day before and day after.
date_before=datetime(2022,3,2,23)
date_after=datetime(2022,3,3,23)
pol_hour="fyn_23"
cycle=pol_hour.split("_")[1]
plt_fcst_R01 = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_before,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_before,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_before,cycle,"glatmodel")
plt_fcst_R01 = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_after,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_after,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_after,cycle,"glatmodel")Plotting for cycle 01, day before and day after.
date_before=datetime(2022,3,2,1)
date_after=datetime(2022,3,3,1)
pol_hour="fyn_01"
cycle=pol_hour.split("_")[1]
plt_fcst_R01 = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_before,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_before,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_before,cycle,"glatmodel")
plt_fcst_R01 = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_after,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_after,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_after,cycle,"glatmodel")Plotting for cycle 02, day before and day after.
date_before=datetime(2022,3,2,2)
date_after=datetime(2022,3,3,2)
pol_hour="fyn_02"
cycle=pol_hour.split("_")[1]
plt_fcst_R01 = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_before,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_before,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_before,cycle,"glatmodel")
plt_fcst_R01 = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_after,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_after,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_after,cycle,"glatmodel")Plotting for cycle 03, day before and day after.
date_before=datetime(2022,3,2,3)
date_after=datetime(2022,3,3,3)
pol_hour="fyn_03"
cycle=pol_hour.split("_")[1]
plt_fcst_R01 = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_before,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_before,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_before,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_before,cycle,"glatmodel")
plt_fcst_R01 = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_R01)
plt_fcst_glat = set_plot_fcst(date_after,pol_hour,pol_name,fcst_sel_glat)
plt_obs = set_plot_obs(date_after,pol_hour,pol_name,plt_fcst_R01)
stats_R01=plot_dist(plt_fcst_R01,plt_obs,date_after,cycle,"R01")
stats_glat=plot_dist(plt_fcst_glat,plt_obs,date_after,cycle,"glatmodel")#{python} #fig = plt.figure(figsize=(10,6)) #plt.plot([1,2,3,4,5,6],stds) # #